当前位置: 首页 > 图灵资讯 > 技术篇> sock结构

sock结构

来源:图灵教育
时间:2023-05-30 09:33:26

/**  *struct sock - network layer representation of sockets  *@__sk_common: shared layout with inet_timewait_sock  *@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN  *@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings  *@sk_lock:synchronizer  *@sk_rcvbuf: size of receive buffer in bytes  *@sk_wq: sock wait queue and async head  *@sk_rx_dst: receive input route used by early demux  *@sk_dst_cache: destination cache  *@sk_policy: flow policy  *@sk_receive_queue: incoming packets  *@sk_wmem_alloc: transmit queue bytes committed  *@sk_write_queue: Packet sending queue  *@sk_omem_alloc: "o" is "option" or "other"  *@sk_wmem_queued: persistent queue size  *@sk_forward_alloc: space allocated forward  *@sk_napi_id: id of the last napi context to receive data for sk  *@sk_ll_usec: usecs to busypoll when there is no data  *@sk_allocation: allocation mode  *@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)  *@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)  *@sk_sndbuf: size of send buffer in bytes  *@sk_padding: unused element for alignment  *@sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets  *@sk_no_check_rx: allow zero checksum in RX packets  *@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)  *@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)  *@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)  *@sk_gso_max_size: Maximum GSO segment size to build  *@sk_gso_max_segs: Maximum number of GSO segments  *@sk_lingertime: %SO_LINGER l_linger setting  *@sk_backlog: always used with the per-socket spinlock held  *@sk_callback_lock: used with the callbacks in the end of this struct  *@sk_error_queue: rarely used  *@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,  *  IPV6_ADDRFORM for instance)  *@sk_err: last error  *@sk_err_soft: errors that don't cause failure but are the cause of a  *      persistent failure not just 'timed out'  *@sk_drops: raw/udp drops counter  *@sk_ack_backlog: current listen backlog  *@sk_max_ack_backlog: listen backlog set in listen()  *@sk_priority: %SO_PRIORITY setting  *@sk_type: socket type (%SOCK_STREAM, etc)  *@sk_protocol: which protocol this socket belongs in this network family  *@sk_peer_pid: &struct pid for this socket's peer  *@sk_peer_cred: %SO_PEERCRED setting  *@sk_rcvlowat: %SO_RCVLOWAT setting  *@sk_rcvtimeo: %SO_RCVTIMEO setting  *@sk_sndtimeo: %SO_SNDTIMEO setting  *@sk_txhash: computed flow hash for use on transmit  *@sk_filter: socket filtering instructions  *@sk_timer: sock cleanup timer  *@sk_stamp: time stamp of last packet received  *@sk_tsflags: SO_TIMESTAMPING socket options  *@sk_tskey: counter to disambiguate concurrent tstamp requests  *@sk_socket: Identd and reporting IO signals  *@sk_user_data: RPC layer private data  *@sk_frag: cached page frag  *@sk_peek_off: current peek_offset value  *@sk_send_head: front of stuff to transmit  *@sk_security: used by security modules  *@sk_mark: generic packet mark  *@sk_cgrp_data: cgroup data for this cgroup  *@sk_memcg: this socket's memory cgroup association  *@sk_write_pending: a write to stream socket waits to start  *@sk_state_change: callback to indicate change in the state of the sock  *@sk_data_ready: callback to indicate there is data to be processed  *@sk_write_space: callback to indicate there is bf sending space available  *@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)  *@sk_backlog_rcv: callback to process the backlog  *@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0  *@sk_reuseport_cb: reuseport group container  *@sk_rcu: used during RCU grace period  */struct sock {/* * Now struct inet_timewait_sock also uses sock_common, so please just * don't add nothing before this first member (__sk_common) --acme */struct sock_common__sk_common;#define sk_node__sk_common.skc_node#define sk_nulls_node__sk_common.skc_nulls_node#define sk_refcnt__sk_common.skc_refcnt#define sk_tx_queue_mapping__sk_common.skc_tx_queue_mapping#define sk_dontcopy_begin__sk_common.skc_dontcopy_begin#define sk_dontcopy_end__sk_common.skc_dontcopy_end#define sk_hash__sk_common.skc_hash#define sk_portpair__sk_common.skc_portpair#define sk_num__sk_common.skc_num#define sk_dport__sk_common.skc_dport#define sk_addrpair__sk_common.skc_addrpair#define sk_daddr__sk_common.skc_daddr#define sk_rcv_saddr__sk_common.skc_rcv_saddr#define sk_family__sk_common.skc_family#define sk_state__sk_common.skc_state#define sk_reuse__sk_common.skc_reuse#define sk_reuseport__sk_common.skc_reuseport#define sk_ipv6only__sk_common.skc_ipv6only#define sk_net_refcnt__sk_common.skc_net_refcnt#define sk_bound_dev_if__sk_common.skc_bound_dev_if#define sk_bind_node__sk_common.skc_bind_node#define sk_prot__sk_common.skc_prot#define sk_net__sk_common.skc_net#define sk_v6_daddr__sk_common.skc_v6_daddr#define sk_v6_rcv_saddr__sk_common.skc_v6_rcv_saddr#define sk_cookie__sk_common.skc_cookie#define sk_incoming_cpu__sk_common.skc_incoming_cpu#define sk_flags__sk_common.skc_flags#define sk_rxhash__sk_common.skc_rxhashsocket_lock_tsk_lock;struct sk_buff_headsk_receive_queue;/* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. * Note : rmem_alloc is in this structure to fill a hole * on 64bit arches, not because its logically part of * backlog. */struct {atomic_trmem_alloc;intlen;struct sk_buff*head;struct sk_buff*tail;} sk_backlog;#define sk_rmem_alloc sk_backlog.rmem_allocintsk_forward_alloc;__u32sk__txhash;#ifdef CONFIG_NET_RX_BUSY_POLLunsigned intsk_napi_id;unsigned intsk_ll_usec;#endifatomic_tsk_drops;intsk_rcvbuf;struct sk_filter __rcu*sk_filter;union {struct socket_wq __rcu*sk_wq;struct socket_wq*sk_wq_raw;};#ifdef CONFIG_XFRMstruct xfrm_policy __rcu *sk_policy[2];#endifstruct dst_entry*sk_rx_dst;struct dst_entry __rcu*sk_dst_cache;/* Note: 32bit hole on 64bit arches */atomic_tsk_wmem_alloc;atomic_tsk_omem_alloc;intsk_sndbuf;struct sk_buff_headsk_write_queue;/* * Because of non atomicity rules, all * changes are protected by socket lock. */kmemcheck_bitfield_begin(flags);unsigned intsk_padding : 2,sk_no_check_tx : 1,sk_no_check_rx : 1,sk_userlocks : 4,sk_protocol  : 8,sk_type      : 16;#define SK_PROTOCOL_MAX U8_MAXkmemcheck_bitfield_end(flags);intsk_wmem_queued;gfp_tsk_allocation;u32sk_pacing_rate; /* bytes per second */u32sk_max_pacing_rate;netdev_features_tsk_route_caps;netdev_features_tsk_route_nocaps;intsk_gso_type;unsigned intsk_gso_max_size;u16sk_gso_max_segs;intsk_rcvlowat;unsigned long        sk_lingertime;struct sk_buff_headsk_error_queue;struct proto*sk_prot_creator;rwlock_tsk_callback_lock;intsk_err,sk_err_soft;u32sk_ack_backlog;u32sk_max_ack_backlog;__u32sk___priority;__u32sk___mark;struct pid*sk_peer_pid;const struct cred*sk_peer_cred;longsk_rcvtimeo;longsk_sndtimeo;struct timer_listsk_timer;ktime_tsk_stamp;u16sk_tsflags;u8sk_shutdown;u32sk_tskey;struct socket*sk_socket;void*sk_user_data;struct page_fragsk_frag;struct sk_buff*sk_send_head;__s32sk___peek_off;intsk_write_pending;#ifdef CONFIG_SECURITYvoid*sk_security;#endifstruct sock_cgroup_datask_cgrp_data;struct mem_cgroup*sk_memcg;void(*sk_state_change)(struct sock *sk);void(*sk_data_ready)(struct sock *sk);void(*sk_write_space)(struct sock *sk);void(*sk_error_report)(struct sock *sk);int(*sk_backlog_rcv)(struct sock *sk,  struct sk_buff *skb);void                    (*sk_destruct)(struct sock *sk);struct sock_reuseport __rcu*sk_reuseport_cb;struct rcu_headsk_rcu;};