2015-04-13 17:11:11 +02:00
/*
* Functions managing applets
*
* Copyright 2000 - 2015 Willy Tarreau < w @ 1 wt . eu >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
# include <stdio.h>
# include <stdlib.h>
2020-05-27 12:58:42 +02:00
# include <haproxy/api.h>
2020-06-09 09:07:15 +02:00
# include <haproxy/applet.h>
2020-06-04 21:07:02 +02:00
# include <haproxy/channel.h>
2020-05-27 18:01:47 +02:00
# include <haproxy/list.h>
2020-06-04 23:46:14 +02:00
# include <haproxy/stream.h>
2020-06-04 20:45:39 +02:00
# include <haproxy/stream_interface.h>
2020-06-04 17:25:40 +02:00
# include <haproxy/task.h>
2015-04-13 17:11:11 +02:00
2016-12-06 09:13:22 +01:00
unsigned int nb_applets = 0 ;
2017-06-19 12:38:55 +02:00
2019-07-18 10:41:36 +02:00
DECLARE_POOL ( pool_head_appctx , " appctx " , sizeof ( struct appctx ) ) ;
2018-11-06 17:32:37 +01:00
/* Callback used to wake up an applet when a buffer is available. The applet
* < appctx > is woken up if an input buffer was requested for the associated
* stream interface . In this case the buffer is immediately allocated and the
* function returns 1. Otherwise it returns 0. Note that this automatically
* covers multiple wake - up attempts by ensuring that the same buffer will not
* be accounted for multiple times .
*/
int appctx_buf_available ( void * arg )
{
struct appctx * appctx = arg ;
2021-12-20 17:09:39 +01:00
struct stream_interface * si = cs_si ( appctx - > owner ) ;
2018-11-06 17:32:37 +01:00
/* allocation requested ? */
2018-11-14 15:12:08 +01:00
if ( ! ( si - > flags & SI_FL_RXBLK_BUFF ) )
return 0 ;
si_rx_buff_rdy ( si ) ;
/* was already allocated another way ? if so, don't take this one */
if ( c_size ( si_ic ( si ) ) | | si_ic ( si ) - > pipe )
2018-11-06 17:32:37 +01:00
return 0 ;
/* allocation possible now ? */
2021-03-22 14:44:31 +01:00
if ( ! b_alloc ( & si_ic ( si ) - > buf ) ) {
2018-11-14 15:12:08 +01:00
si_rx_buff_blk ( si ) ;
2018-11-06 17:32:37 +01:00
return 0 ;
2018-11-14 15:12:08 +01:00
}
2018-11-06 17:32:37 +01:00
task_wakeup ( appctx - > t , TASK_WOKEN_RES ) ;
return 1 ;
}
/* Default applet handler */
2021-03-02 16:09:26 +01:00
struct task * task_run_applet ( struct task * t , void * context , unsigned int state )
2015-04-19 09:59:31 +02:00
{
2018-05-25 16:58:52 +02:00
struct appctx * app = context ;
2021-12-20 17:09:39 +01:00
struct stream_interface * si ;
2019-04-25 19:12:26 +02:00
unsigned int rate ;
2021-04-27 17:08:10 +02:00
size_t count ;
2015-04-19 09:59:31 +02:00
2018-05-25 16:58:52 +02:00
if ( app - > state & APPLET_WANT_DIE ) {
__appctx_free ( app ) ;
return NULL ;
2017-06-26 16:36:53 +02:00
}
2018-05-25 16:58:52 +02:00
2021-12-20 17:09:39 +01:00
si = cs_si ( app - > owner ) ;
2018-05-25 16:58:52 +02:00
/* We always pretend the applet can't get and doesn't want to
* put , it ' s up to it to change this if needed . This ensures
* that one applet which ignores any event will not spin .
2015-09-25 17:56:16 +02:00
*/
2018-11-06 18:46:37 +01:00
si_cant_get ( si ) ;
2018-11-14 17:54:13 +01:00
si_rx_endp_done ( si ) ;
2015-04-19 09:59:31 +02:00
2018-11-14 15:12:08 +01:00
/* Now we'll try to allocate the input buffer. We wake up the applet in
* all cases . So this is the applet ' s responsibility to check if this
* buffer was allocated or not . This leaves a chance for applets to do
* some other processing if needed . The applet doesn ' t have anything to
* do if it needs the buffer , it will be called again upon readiness .
*/
if ( ! si_alloc_ibuf ( si , & app - > buffer_wait ) )
2018-11-14 17:54:13 +01:00
si_rx_endp_more ( si ) ;
2018-11-14 15:12:08 +01:00
2021-04-27 17:08:10 +02:00
count = co_data ( si_oc ( si ) ) ;
2018-05-25 16:58:52 +02:00
app - > applet - > fct ( app ) ;
2019-10-11 14:15:47 +02:00
2021-04-27 17:08:10 +02:00
/* now check if the applet has released some room and forgot to
* notify the other side about it .
*/
if ( count ! = co_data ( si_oc ( si ) ) ) {
si_oc ( si ) - > flags | = CF_WRITE_PARTIAL | CF_WROTE_DATA ;
si_rx_room_rdy ( si_opposite ( si ) ) ;
}
2019-10-11 14:15:47 +02:00
/* measure the call rate and check for anomalies when too high */
rate = update_freq_ctr ( & app - > call_rate , 1 ) ;
if ( rate > = 100000 & & app - > call_rate . prev_ctr & & // looped more than 100k times over last second
( ( b_size ( si_ib ( si ) ) & & si - > flags & SI_FL_RXBLK_BUFF ) | | // asks for a buffer which is present
( b_size ( si_ib ( si ) ) & & ! b_data ( si_ib ( si ) ) & & si - > flags & SI_FL_RXBLK_ROOM ) | | // asks for room in an empty buffer
( b_data ( si_ob ( si ) ) & & si_tx_endp_ready ( si ) & & ! si_tx_blocked ( si ) ) | | // asks for data already present
( ! b_data ( si_ib ( si ) ) & & b_data ( si_ob ( si ) ) & & // didn't return anything ...
( si_oc ( si ) - > flags & ( CF_WRITE_PARTIAL | CF_SHUTW_NOW ) ) = = CF_SHUTW_NOW ) ) ) { // ... and left data pending after a shut
stream_dump_and_crash ( & app - > obj_type , read_freq_ctr ( & app - > call_rate ) ) ;
}
2018-05-25 16:58:52 +02:00
si_applet_wake_cb ( si ) ;
channel_release_buffer ( si_ic ( si ) , & app - > buffer_wait ) ;
return t ;
2015-04-19 09:59:31 +02:00
}