Project homepage Mailing List  Warmcat.com  API Docs  Github Mirror 
{"schema":"libjg2-1", "vpath":"/git/", "avatar":"/git/avatar/", "alang":"en-US,en;q\u003d0.5", "gen_ut":1579888951, "reponame":"libwebsockets", "desc":"libwebsockets lightweight C networking library", "owner": { "name": "Andy Green", "email": "andy@warmcat.com", "md5": "c50933ca2aa61e0fe2c43d46bb6b59cb" },"url":"https://libwebsockets.org/repo/libwebsockets", "f":3, "items": [ {"schema":"libjg2-1", "cid":"0577d3738433b6c4aa6f463e4019ef0c", "oid":{ "oid": "934da8629112a57d11ace8796c82d2f8342825cb", "alias": [ "refs/heads/master"]},"blobname": "minimal-examples/ws-server/minimal-ws-server-ring/protocol_lws_minimal.c", "blob": "/*\n * ws protocol handler plugin for \u0022lws-minimal\u0022\n *\n * Written in 2010-2019 by Andy Green \u003candy@warmcat.com\u003e\n *\n * This file is made available under the Creative Commons CC0 1.0\n * Universal Public Domain Dedication.\n *\n * This version uses an lws_ring ringbuffer to cache up to 8 messages at a time,\n * so it's not so easy to lose messages.\n *\n * This also demonstrates how to \u0022cull\u0022, ie, kill, connections that can't\n * keep up for some reason.\n */\n\n#if !defined (LWS_PLUGIN_STATIC)\n#define LWS_DLL\n#define LWS_INTERNAL\n#include \u003clibwebsockets.h\u003e\n#endif\n\n#include \u003cstring.h\u003e\n\n/* one of these created for each message */\n\nstruct msg {\n\tvoid *payload; /* is malloc'd */\n\tsize_t len;\n};\n\n/* one of these is created for each client connecting to us */\n\nstruct per_session_data__minimal {\n\tstruct per_session_data__minimal *pss_list;\n\tstruct lws *wsi;\n\tuint32_t tail;\n\n\tunsigned int culled:1;\n};\n\n/* one of these is created for each vhost our protocol is used with */\n\nstruct per_vhost_data__minimal {\n\tstruct lws_context *context;\n\tstruct lws_vhost *vhost;\n\tconst struct lws_protocols *protocol;\n\n\tstruct per_session_data__minimal *pss_list; /* linked-list of live pss*/\n\n\tstruct lws_ring *ring; /* ringbuffer holding unsent messages */\n};\n\nstatic void\ncull_lagging_clients(struct per_vhost_data__minimal *vhd)\n{\n\tuint32_t oldest_tail \u003d lws_ring_get_oldest_tail(vhd-\u003ering);\n\tstruct per_session_data__minimal *old_pss \u003d NULL;\n\tint most \u003d 0, before \u003d lws_ring_get_count_waiting_elements(vhd-\u003ering,\n\t\t\t\t\t\u0026oldest_tail), m;\n\n\t/*\n\t * At least one guy with the oldest tail has lagged too far, filling\n\t * the ringbuffer with stuff waiting for them, while new stuff is\n\t * coming in, and they must close, freeing up ringbuffer entries.\n\t */\n\n\tlws_start_foreach_llp_safe(struct per_session_data__minimal **,\n\t\t\t ppss, vhd-\u003epss_list, pss_list) {\n\n\t\tif ((*ppss)-\u003etail \u003d\u003d oldest_tail) {\n\t\t\told_pss \u003d *ppss;\n\n\t\t\tlwsl_user(\u0022Killing lagging client %p\u005cn\u0022, (*ppss)-\u003ewsi);\n\n\t\t\tlws_set_timeout((*ppss)-\u003ewsi, PENDING_TIMEOUT_LAGGING,\n\t\t\t\t\t/*\n\t\t\t\t\t * we may kill the wsi we came in on,\n\t\t\t\t\t * so the actual close is deferred\n\t\t\t\t\t */\n\t\t\t\t\tLWS_TO_KILL_ASYNC);\n\n\t\t\t/*\n\t\t\t * We might try to write something before we get a\n\t\t\t * chance to close. But this pss is now detached\n\t\t\t * from the ring buffer. Mark this pss as culled so we\n\t\t\t * don't try to do anything more with it.\n\t\t\t */\n\n\t\t\t(*ppss)-\u003eculled \u003d 1;\n\n\t\t\t/*\n\t\t\t * Because we can't kill it synchronously, but we\n\t\t\t * know it's closing momentarily and don't want its\n\t\t\t * participation any more, remove its pss from the\n\t\t\t * vhd pss list early. (This is safe to repeat\n\t\t\t * uselessly later in the close flow).\n\t\t\t *\n\t\t\t * Notice this changes *ppss!\n\t\t\t */\n\n\t\t\tlws_ll_fwd_remove(struct per_session_data__minimal,\n\t\t\t\t\t pss_list, (*ppss), vhd-\u003epss_list);\n\n\t\t\t/* use the changed *ppss so we won't skip anything */\n\n\t\t\tcontinue;\n\n\t\t} else {\n\t\t\t/*\n\t\t\t * so this guy is a survivor of the cull. Let's track\n\t\t\t * what is the largest number of pending ring elements\n\t\t\t * for any survivor.\n\t\t\t */\n\t\t\tm \u003d lws_ring_get_count_waiting_elements(vhd-\u003ering,\n\t\t\t\t\t\t\t\u0026((*ppss)-\u003etail));\n\t\t\tif (m \u003e most)\n\t\t\t\tmost \u003d m;\n\t\t}\n\n\t} lws_end_foreach_llp_safe(ppss);\n\n\t/* it would mean we lost track of oldest... but Coverity insists */\n\tif (!old_pss)\n\t\treturn;\n\n\t/*\n\t * Let's recover (ie, free up) all the ring slots between the\n\t * original oldest's last one and the \u0022worst\u0022 survivor.\n\t */\n\n\tlws_ring_consume_and_update_oldest_tail(vhd-\u003ering,\n\t\tstruct per_session_data__minimal, \u0026old_pss-\u003etail, before - most,\n\t\tvhd-\u003epss_list, tail, pss_list);\n\n\tlwsl_user(\u0022%s: shrunk ring from %d to %d\u005cn\u0022, __func__, before, most);\n}\n\n/* destroys the message when everyone has had a copy of it */\n\nstatic void\n__minimal_destroy_message(void *_msg)\n{\n\tstruct msg *msg \u003d _msg;\n\n\tfree(msg-\u003epayload);\n\tmsg-\u003epayload \u003d NULL;\n\tmsg-\u003elen \u003d 0;\n}\n\nstatic int\ncallback_minimal(struct lws *wsi, enum lws_callback_reasons reason,\n\t\t\tvoid *user, void *in, size_t len)\n{\n\tstruct per_session_data__minimal *pss \u003d\n\t\t\t(struct per_session_data__minimal *)user;\n\tstruct per_vhost_data__minimal *vhd \u003d\n\t\t\t(struct per_vhost_data__minimal *)\n\t\t\tlws_protocol_vh_priv_get(lws_get_vhost(wsi),\n\t\t\t\t\tlws_get_protocol(wsi));\n\tconst struct msg *pmsg;\n\tstruct msg amsg;\n\tint n, m;\n\n\tswitch (reason) {\n\tcase LWS_CALLBACK_PROTOCOL_INIT:\n\t\tvhd \u003d lws_protocol_vh_priv_zalloc(lws_get_vhost(wsi),\n\t\t\t\tlws_get_protocol(wsi),\n\t\t\t\tsizeof(struct per_vhost_data__minimal));\n\t\tvhd-\u003econtext \u003d lws_get_context(wsi);\n\t\tvhd-\u003eprotocol \u003d lws_get_protocol(wsi);\n\t\tvhd-\u003evhost \u003d lws_get_vhost(wsi);\n\n\t\tvhd-\u003ering \u003d lws_ring_create(sizeof(struct msg), 8,\n\t\t\t\t\t __minimal_destroy_message);\n\t\tif (!vhd-\u003ering)\n\t\t\treturn 1;\n\t\tbreak;\n\n\tcase LWS_CALLBACK_PROTOCOL_DESTROY:\n\t\tlws_ring_destroy(vhd-\u003ering);\n\t\tbreak;\n\n\tcase LWS_CALLBACK_ESTABLISHED:\n\t\t/* add ourselves to the list of live pss held in the vhd */\n\t\tlwsl_user(\u0022LWS_CALLBACK_ESTABLISHED: wsi %p\u005cn\u0022, wsi);\n\t\tlws_ll_fwd_insert(pss, pss_list, vhd-\u003epss_list);\n\t\tpss-\u003etail \u003d lws_ring_get_oldest_tail(vhd-\u003ering);\n\t\tpss-\u003ewsi \u003d wsi;\n\t\tbreak;\n\n\tcase LWS_CALLBACK_CLOSED:\n\t\tlwsl_user(\u0022LWS_CALLBACK_CLOSED: wsi %p\u005cn\u0022, wsi);\n\t\t/* remove our closing pss from the list of live pss */\n\t\tlws_ll_fwd_remove(struct per_session_data__minimal, pss_list,\n\t\t\t\t pss, vhd-\u003epss_list);\n\t\tbreak;\n\n\tcase LWS_CALLBACK_SERVER_WRITEABLE:\n\t\tif (pss-\u003eculled)\n\t\t\tbreak;\n\t\tpmsg \u003d lws_ring_get_element(vhd-\u003ering, \u0026pss-\u003etail);\n\t\tif (!pmsg)\n\t\t\tbreak;\n\n\t\t/* notice we allowed for LWS_PRE in the payload already */\n\t\tm \u003d lws_write(wsi, ((unsigned char *)pmsg-\u003epayload) +\n\t\t\t LWS_PRE, pmsg-\u003elen, LWS_WRITE_TEXT);\n\t\tif (m \u003c (int)pmsg-\u003elen) {\n\t\t\tlwsl_err(\u0022ERROR %d writing to ws socket\u005cn\u0022, m);\n\t\t\treturn -1;\n\t\t}\n\n\t\tlws_ring_consume_and_update_oldest_tail(\n\t\t\tvhd-\u003ering,\t/* lws_ring object */\n\t\t\tstruct per_session_data__minimal, /* type of objects with tails */\n\t\t\t\u0026pss-\u003etail,\t/* tail of guy doing the consuming */\n\t\t\t1,\t\t/* number of payload objects being consumed */\n\t\t\tvhd-\u003epss_list,\t/* head of list of objects with tails */\n\t\t\ttail,\t\t/* member name of tail in objects with tails */\n\t\t\tpss_list\t/* member name of next object in objects with tails */\n\t\t);\n\n\t\t/* more to do for us? */\n\t\tif (lws_ring_get_element(vhd-\u003ering, \u0026pss-\u003etail))\n\t\t\t/* come back as soon as we can write more */\n\t\t\tlws_callback_on_writable(pss-\u003ewsi);\n\t\tbreak;\n\n\tcase LWS_CALLBACK_RECEIVE:\n\t\tn \u003d (int)lws_ring_get_count_free_elements(vhd-\u003ering);\n\t\tif (!n) {\n\t\t\t/* forcibly make space */\n\t\t\tcull_lagging_clients(vhd);\n\t\t\tn \u003d (int)lws_ring_get_count_free_elements(vhd-\u003ering);\n\t\t}\n\t\tif (!n)\n\t\t\tbreak;\n\n\t\tlwsl_user(\u0022LWS_CALLBACK_RECEIVE: free space %d\u005cn\u0022, n);\n\n\t\tamsg.len \u003d len;\n\t\t/* notice we over-allocate by LWS_PRE... */\n\t\tamsg.payload \u003d malloc(LWS_PRE + len);\n\t\tif (!amsg.payload) {\n\t\t\tlwsl_user(\u0022OOM: dropping\u005cn\u0022);\n\t\t\tbreak;\n\t\t}\n\n\t\t/* ...and we copy the payload in at +LWS_PRE */\n\t\tmemcpy((char *)amsg.payload + LWS_PRE, in, len);\n\t\tif (!lws_ring_insert(vhd-\u003ering, \u0026amsg, 1)) {\n\t\t\t__minimal_destroy_message(\u0026amsg);\n\t\t\tlwsl_user(\u0022dropping!\u005cn\u0022);\n\t\t\tbreak;\n\t\t}\n\n\t\t/*\n\t\t * let everybody know we want to write something on them\n\t\t * as soon as they are ready\n\t\t */\n\t\tlws_start_foreach_llp(struct per_session_data__minimal **,\n\t\t\t\t ppss, vhd-\u003epss_list) {\n\t\t\tlws_callback_on_writable((*ppss)-\u003ewsi);\n\t\t} lws_end_foreach_llp(ppss, pss_list);\n\t\tbreak;\n\n\tdefault:\n\t\tbreak;\n\t}\n\n\treturn 0;\n}\n\n#define LWS_PLUGIN_PROTOCOL_MINIMAL \u005c\n\t{ \u005c\n\t\t\u0022lws-minimal\u0022, \u005c\n\t\tcallback_minimal, \u005c\n\t\tsizeof(struct per_session_data__minimal), \u005c\n\t\t0, \u005c\n\t\t0, NULL, 0 \u005c\n\t}\n\n#if !defined (LWS_PLUGIN_STATIC)\n\n/* boilerplate needed if we are built as a dynamic plugin */\n\nstatic const struct lws_protocols protocols[] \u003d {\n\tLWS_PLUGIN_PROTOCOL_MINIMAL\n};\n\nint\ninit_protocol_minimal(struct lws_context *context,\n\t\t struct lws_plugin_capability *c)\n{\n\tif (c-\u003eapi_magic !\u003d LWS_PLUGIN_API_MAGIC) {\n\t\tlwsl_err(\u0022Plugin API %d, library API %d\u0022, LWS_PLUGIN_API_MAGIC,\n\t\t\t c-\u003eapi_magic);\n\t\treturn 1;\n\t}\n\n\tc-\u003eprotocols \u003d protocols;\n\tc-\u003ecount_protocols \u003d LWS_ARRAY_SIZE(protocols);\n\tc-\u003eextensions \u003d NULL;\n\tc-\u003ecount_extensions \u003d 0;\n\n\treturn 0;\n}\n\nint\ndestroy_protocol_minimal(struct lws_context *context)\n{\n\treturn 0;\n}\n#endif\n","s":{"c":1579888951,"u": 513}} ],"g": 3504,"chitpc": 0,"ehitpc": 0, "indexed":0 }