@@ -1635,16 +1635,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
16351635 __sk_mem_reclaim (sk , SK_RECLAIM_CHUNK );
16361636}
16371637
1638- static inline void sock_release_ownership (struct sock * sk )
1639- {
1640- if (sk -> sk_lock .owned ) {
1641- sk -> sk_lock .owned = 0 ;
1642-
1643- /* The sk_lock has mutex_unlock() semantics: */
1644- mutex_release (& sk -> sk_lock .dep_map , _RET_IP_ );
1645- }
1646- }
1647-
16481638/*
16491639 * Macro so as to not evaluate some arguments when
16501640 * lockdep is not enabled.
@@ -1771,12 +1761,23 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
17711761 return sk -> sk_lock .owned ;
17721762}
17731763
1764+ static inline void sock_release_ownership (struct sock * sk )
1765+ {
1766+ if (sock_owned_by_user_nocheck (sk )) {
1767+ sk -> sk_lock .owned = 0 ;
1768+
1769+ /* The sk_lock has mutex_unlock() semantics: */
1770+ mutex_release (& sk -> sk_lock .dep_map , _RET_IP_ );
1771+ }
1772+ }
1773+
17741774/* no reclassification while locks are held */
17751775static inline bool sock_allow_reclassification (const struct sock * csk )
17761776{
17771777 struct sock * sk = (struct sock * )csk ;
17781778
1779- return !sk -> sk_lock .owned && !spin_is_locked (& sk -> sk_lock .slock );
1779+ return !sock_owned_by_user_nocheck (sk ) &&
1780+ !spin_is_locked (& sk -> sk_lock .slock );
17801781}
17811782
17821783struct sock * sk_alloc (struct net * net , int family , gfp_t priority ,
0 commit comments