Project homepage Mailing List  Warmcat.com  API Docs  Github Mirror 
{"schema":"libjg2-1", "vpath":"/git/", "avatar":"/git/avatar/", "alang":"", "gen_ut":1752330899, "reponame":"libwebsockets", "desc":"libwebsockets lightweight C networking library", "owner": { "name": "Andy Green", "email": "andy@warmcat.com", "md5": "c50933ca2aa61e0fe2c43d46bb6b59cb" },"url":"https://libwebsockets.org/repo/libwebsockets", "f":3, "items": [ {"schema":"libjg2-1", "cid":"982d184414d51e3779e88626e319410b", "oid":{ "oid": "028d4dc2fb59dd90a6997eea04bcaed899adb0fe", "alias": [ "refs/heads/main"]},"blobname": "include/libwebsockets/lws-tokenize.h", "blob": "/*\n * libwebsockets - small server side websockets and web server implementation\n *\n * Copyright (C) 2010 - 2019 Andy Green \u003candy@warmcat.com\u003e\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \u0022Software\u0022), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \u0022AS IS\u0022, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* Do not treat - as a terminal character, so \u0022my-token\u0022 is one token */\n#define LWS_TOKENIZE_F_MINUS_NONTERM\t(1 \u003c\u003c 0)\n/* Separately report aggregate colon-delimited tokens */\n#define LWS_TOKENIZE_F_AGG_COLON\t(1 \u003c\u003c 1)\n/* Enforce sequencing for a simple token , token , token ... list */\n#define LWS_TOKENIZE_F_COMMA_SEP_LIST\t(1 \u003c\u003c 2)\n/* Allow more characters in the tokens and less delimiters... default is\n * only alphanumeric + underscore in tokens */\n#define LWS_TOKENIZE_F_RFC7230_DELIMS\t(1 \u003c\u003c 3)\n/* Do not treat . as a terminal character, so \u0022warmcat.com\u0022 is one token */\n#define LWS_TOKENIZE_F_DOT_NONTERM\t(1 \u003c\u003c 4)\n/* If something starts looking like a float, like 1.2, force to be string token.\n * This lets you receive dotted-quads like 192.168.0.1 as string tokens, and\n * avoids illegal float format detection like 1.myserver.com */\n#define LWS_TOKENIZE_F_NO_FLOATS\t(1 \u003c\u003c 5)\n/* Instead of LWS_TOKZE_INTEGER, report integers as any other string token */\n#define LWS_TOKENIZE_F_NO_INTEGERS\t(1 \u003c\u003c 6)\n/* # makes the rest of the line a comment */\n#define LWS_TOKENIZE_F_HASH_COMMENT\t(1 \u003c\u003c 7)\n/* Do not treat / as a terminal character, so \u0022multipart/related\u0022 is one token */\n#define LWS_TOKENIZE_F_SLASH_NONTERM\t(1 \u003c\u003c 8)\n/* Do not treat * as a terminal character, so \u0022myfile*\u0022 is one token */\n#define LWS_TOKENIZE_F_ASTERISK_NONTERM\t(1 \u003c\u003c 9)\n/* Do not treat \u003d as a terminal character, so \u0022x\u003dy\u0022 is one token */\n#define LWS_TOKENIZE_F_EQUALS_NONTERM\t(1 \u003c\u003c 10)\n/* Do not treat : as a terminal character, so ::1 is one token */\n#define LWS_TOKENIZE_F_COLON_NONTERM\t(1 \u003c\u003c 11)\n\n/* We're just tokenizing a chunk, don't treat running out of input as final */\n#define LWS_TOKENIZE_F_EXPECT_MORE\t(1 \u003c\u003c 12)\n\ntypedef enum {\n\n\tLWS_TOKZE_ERRS\t\t\t\u003d 7, /* the number of errors defined */\n\n\tLWS_TOKZE_TOO_LONG\t\t\u003d -7,\t/* token too long */\n\tLWS_TOKZE_WANT_READ\t\t\u003d -6,\t/* need more input */\n\tLWS_TOKZE_ERR_BROKEN_UTF8\t\u003d -5,\t/* malformed or partial utf8 */\n\tLWS_TOKZE_ERR_UNTERM_STRING\t\u003d -4,\t/* ended while we were in \u0022\u0022 */\n\tLWS_TOKZE_ERR_MALFORMED_FLOAT\t\u003d -3,\t/* like 0..1 or 0.1.1 */\n\tLWS_TOKZE_ERR_NUM_ON_LHS\t\u003d -2,\t/* like 123\u003d or 0.1\u003d */\n\tLWS_TOKZE_ERR_COMMA_LIST\t\u003d -1,\t/* like \u0022,tok\u0022, or, \u0022tok,,\u0022 */\n\n\tLWS_TOKZE_ENDED \u003d 0,\t\t/* no more content */\n\n\t/* Note: results have ordinal 1+, EOT is 0 and errors are \u003c 0 */\n\n\tLWS_TOKZE_DELIMITER,\t\t/* a delimiter appeared */\n\tLWS_TOKZE_TOKEN,\t\t/* a token appeared */\n\tLWS_TOKZE_INTEGER,\t\t/* an integer appeared */\n\tLWS_TOKZE_FLOAT,\t\t/* a float appeared */\n\tLWS_TOKZE_TOKEN_NAME_EQUALS,\t/* token [whitespace] \u003d */\n\tLWS_TOKZE_TOKEN_NAME_COLON,\t/* token [whitespace] : (only with\n\t\t\t\t\t LWS_TOKENIZE_F_AGG_COLON flag) */\n\tLWS_TOKZE_QUOTED_STRING,\t/* \u0022*\u0022, where * may have any char */\n\n} lws_tokenize_elem;\n\n/*\n * helper enums to allow caller to enforce legal delimiter sequencing, eg\n * disallow \u0022token,,token\u0022, \u0022token,\u0022, and \u0022,token\u0022\n */\n\nenum lws_tokenize_delimiter_tracking {\n\tLWSTZ_DT_NEED_FIRST_CONTENT,\n\tLWSTZ_DT_NEED_DELIM,\n\tLWSTZ_DT_NEED_NEXT_CONTENT,\n};\n\ntypedef enum {\n\tLWS_TOKZS_LEADING_WHITESPACE,\n\tLWS_TOKZS_QUOTED_STRING,\n\tLWS_TOKZS_TOKEN,\n\tLWS_TOKZS_TOKEN_POST_TERMINAL\n} lws_tokenize_state;\n\ntypedef struct lws_tokenize {\n\tchar collect[256]; /* token length limit */\n\tconst char *start; /**\u003c set to the start of the string to tokenize */\n\tconst char *token; /**\u003c the start of an identified token or delimiter */\n\tsize_t len;\t/**\u003c set to the length of the string to tokenize */\n\tsize_t token_len;\t/**\u003c the length of the identied token or delimiter */\n\n\tlws_tokenize_state state;\n\n\tint line;\n\tint effline;\n\n\tuint16_t flags;\t/**\u003c optional LWS_TOKENIZE_F_ flags, or 0 */\n\tuint8_t delim;\n\n\tint8_t e; /**\u003c convenient for storing lws_tokenize return */\n\tuint8_t reset_token:1;\n\tuint8_t crlf:1;\n\tuint8_t dry:1;\n} lws_tokenize_t;\n\n/**\n * lws_tokenize() - breaks down a string into tokens and delimiters in-place\n *\n * \u005cparam ts: the lws_tokenize struct to init\n * \u005cparam start: the string to tokenize\n * \u005cparam flags: LWS_TOKENIZE_F_ option flags\n *\n * This initializes the tokenize struct to point to the given string, and\n * sets the length to 2GiB - 1 (so there must be a terminating NUL)... you can\n * override this requirement by setting ts.len yourself before using it.\n *\n * .delim is also initialized to LWSTZ_DT_NEED_FIRST_CONTENT.\n */\n\nLWS_VISIBLE LWS_EXTERN void\nlws_tokenize_init(struct lws_tokenize *ts, const char *start, int flags);\n\n/**\n * lws_tokenize() - breaks down a string into tokens and delimiters in-place\n *\n * \u005cparam ts: the lws_tokenize struct with information and state on what to do\n *\n * The \u005cp ts struct should have its start, len and flags members initialized to\n * reflect the string to be tokenized and any options.\n *\n * Then `lws_tokenize()` may be called repeatedly on the struct, returning one\n * of `lws_tokenize_elem` each time, and with the struct's `token` and\n * `token_len` members set to describe the content of the delimiter or token\n * payload each time.\n *\n * There are no allocations during the process.\n *\n * returns lws_tokenize_elem that was identified (LWS_TOKZE_ENDED means reached\n * the end of the string).\n */\n\nLWS_VISIBLE LWS_EXTERN lws_tokenize_elem\nlws_tokenize(struct lws_tokenize *ts);\n\n/**\n * lws_tokenize_cstr() - copy token string to NUL-terminated buffer\n *\n * \u005cparam ts: pointer to lws_tokenize struct to operate on\n * \u005cparam str: destination buffer\n * \u005cpparam max: bytes in destination buffer\n *\n * returns 0 if OK or nonzero if the string + NUL won't fit.\n */\n\nLWS_VISIBLE LWS_EXTERN int\nlws_tokenize_cstr(struct lws_tokenize *ts, char *str, size_t max);\n\n\n/*\n * lws_strexp: flexible string expansion helper api\n *\n * This stateful helper can handle multiple separate input chunks and multiple\n * output buffer loads with arbitrary boundaries between literals and expanded\n * symbols. This allows it to handle fragmented input as well as arbitrarily\n * long symbol expansions that are bigger than the output buffer itself.\n *\n * A user callback is used to convert symbol names to the symbol value.\n *\n * A single byte buffer for input and another for output can process any\n * length substitution then. The state object is around 64 bytes on a 64-bit\n * system and it only uses 8 bytes stack.\n */\n\n\ntypedef int (*lws_strexp_expand_cb)(void *priv, const char *name, char *out,\n\t\t\t\t size_t *pos, size_t olen, size_t *exp_ofs);\n\ntypedef struct lws_strexp {\n\tchar\t\t\tname[32];\n\tlws_strexp_expand_cb\tcb;\n\tvoid\t\t\t*priv;\n\tchar\t\t\t*out;\n\tsize_t\t\t\tolen;\n\tsize_t\t\t\tpos;\n\n\tsize_t\t\t\texp_ofs;\n\n\tuint8_t\t\t\tname_pos;\n\tchar\t\t\tstate;\n} lws_strexp_t;\n\nenum {\n\tLSTRX_DONE,\t\t\t/* it completed OK */\n\tLSTRX_FILLED_OUT,\t\t/* out buf filled and needs resetting */\n\tLSTRX_FATAL_NAME_TOO_LONG \u003d -1,\t/* fatal */\n\tLSTRX_FATAL_NAME_UNKNOWN \u003d -2,\n};\n\n\n/**\n * lws_strexp_init() - initialize an lws_strexp_t for use\n *\n * \u005cp exp: the exp object to init\n * \u005cp priv: the user's object pointer to pass to callback\n * \u005cp cb: the callback to expand named objects\n * \u005cp out: the start of the output buffer, or NULL just to get the length\n * \u005cp olen: the length of the output buffer in bytes\n *\n * Prepares an lws_strexp_t for use and sets the initial output buffer\n *\n * If \u005cp out is NULL, substitution proceeds normally, but no output is produced,\n * only the length is returned. olen should be set to the largest feasible\n * overall length. To use this mode, the substitution callback must also check\n * for NULL \u005cp out and avoid producing the output.\n */\nLWS_VISIBLE LWS_EXTERN void\nlws_strexp_init(lws_strexp_t *exp, void *priv, lws_strexp_expand_cb cb,\n\t\tchar *out, size_t olen);\n\n/**\n * lws_strexp_reset_out() - reset the output buffer on an existing strexp\n *\n * \u005cp exp: the exp object to init\n * \u005cp out: the start of the output buffer, or NULL to just get length\n * \u005cp olen: the length of the output buffer in bytes\n *\n * Provides a new output buffer for lws_strexp_expand() to continue to write\n * into. It can be the same as the old one if it has been copied out or used.\n * The position of the next write will be reset to the start of the given buf.\n *\n * If \u005cp out is NULL, substitution proceeds normally, but no output is produced,\n * only the length is returned. \u005cp olen should be set to the largest feasible\n * overall length. To use this mode, the substitution callback must also check\n * for NULL \u005cp out and avoid producing the output.\n */\nLWS_VISIBLE LWS_EXTERN void\nlws_strexp_reset_out(lws_strexp_t *exp, char *out, size_t olen);\n\n/**\n * lws_strexp_expand() - copy / expand a string into the output buffer\n *\n * \u005cp exp: the exp object for the copy / expansion\n * \u005cp in: the start of the next input data\n * \u005cp len: the length of the input data\n * \u005cp pused_in: pointer to write the amount of input used\n * \u005cp pused_out: pointer to write the amount of output used\n *\n * Copies in to the output buffer set in exp, expanding any ${name} tokens using\n * the callback. \u005cp *pused_in is set to the number of input chars used and\n * \u005cp *pused_out the number of output characters used\n *\n * May return LSTRX_FILLED_OUT early with *pused \u003c len if the output buffer is\n * filled. Handle the output buffer and reset it with lws_strexp_reset_out()\n * before calling again with adjusted in / len to continue.\n *\n * In the case of large expansions, the expansion itself may fill the output\n * buffer, in which case the expansion callback returns the LSTRX_FILLED_OUT\n * and will be called again to continue with its *exp_ofs parameter set\n * appropriately.\n */\nLWS_VISIBLE LWS_EXTERN int\nlws_strexp_expand(lws_strexp_t *exp, const char *in, size_t len,\n\t\t size_t *pused_in, size_t *pused_out);\n\n/**\n * lws_strcmp_wildcard() - strcmp but the first arg can have wildcards\n *\n * \u005cp wildcard: a string that may contain zero to three *, and may lack a NUL\n * \u005cp wlen: length of the wildcard string\n * \u005cp check: string to test to see if it matches wildcard\n * \u005cp clen: length of check string\n *\n * Like strcmp, but supports patterns like \u0022a*\u0022, \u0022a*b\u0022, \u0022a*b*\u0022 etc\n * where a and b are arbitrary substrings. Both the wc and check strings need\n * not be NUL terminated, but are specified by lengths.\n */\nLWS_VISIBLE LWS_EXTERN int\nlws_strcmp_wildcard(const char *wildcard, size_t wlen, const char *check,\n\t\t size_t clen);\n","s":{"c":1752330899,"u": 921}} ],"g": 5662,"chitpc": 0,"ehitpc": 0,"indexed":0 , "ab": 1, "si": 0, "db":0, "di":0, "sat":0, "lfc": "0000"}