2323#include " absl/container/flat_hash_map.h"
2424#include " absl/log/absl_check.h"
2525#include " absl/strings/string_view.h"
26+ #ifdef Py_GIL_DISABLED
27+ // Only include mutex for free-threaded builds
2628#include " absl/synchronization/mutex.h"
29+ #endif
2730#include " google/protobuf/descriptor.h"
2831#include " google/protobuf/dynamic_message.h"
2932#include " google/protobuf/internal_feature_helper.h"
@@ -73,14 +76,61 @@ namespace google {
7376namespace protobuf {
7477namespace python {
7578
79+ // Zero-cost mutex wrapper that compiles away to nothing in GIL-enabled builds.
80+ // Similar to nanobind's ft_mutex pattern.
81+ class FreeThreadingMutex {
82+ public:
83+ FreeThreadingMutex () = default ;
84+ explicit constexpr FreeThreadingMutex (absl::ConstInitType)
85+ #ifdef Py_GIL_DISABLED
86+ : mutex_(absl::kConstInit )
87+ #endif
88+ {
89+ }
90+ FreeThreadingMutex (const FreeThreadingMutex&) = delete ;
91+ FreeThreadingMutex& operator =(const FreeThreadingMutex&) = delete ;
92+
93+ #ifndef Py_GIL_DISABLED
94+ // GIL-enabled build: no-op mutex (zero cost)
95+ void Lock () {}
96+ void Unlock () {}
97+ #else
98+ // Free-threaded build: real mutex
99+ void Lock () { mutex_.Lock (); }
100+ void Unlock () { mutex_.Unlock (); }
101+
102+ private:
103+ absl::Mutex mutex_;
104+ #endif
105+ };
106+
107+ // RAII lock guard for FreeThreadingMutex
108+ class FreeThreadingLockGuard {
109+ public:
110+ explicit FreeThreadingLockGuard (FreeThreadingMutex& mutex) : mutex_(mutex) {
111+ mutex_.Lock ();
112+ }
113+ ~FreeThreadingLockGuard () { mutex_.Unlock (); }
114+
115+ FreeThreadingLockGuard (const FreeThreadingLockGuard&) = delete ;
116+ FreeThreadingLockGuard& operator =(const FreeThreadingLockGuard&) = delete ;
117+
118+ private:
119+ FreeThreadingMutex& mutex_;
120+ };
121+
76122// Store interned descriptors, so that the same C++ descriptor yields the same
77123// Python object. Objects are not immortal: this map does not own the
78124// references, and items are deleted when the last reference to the object is
79125// released.
80126// This is enough to support the "is" operator on live objects.
81127// All descriptors are stored here.
82128absl::flat_hash_map<const void *, PyObject*>* interned_descriptors;
83- absl::Mutex interned_descriptors_lock (absl::kConstInit );
129+
130+ // Mutex to protect interned_descriptors from concurrent access in
131+ // free-threading Python builds. Zero-cost in GIL-enabled builds.
132+ // NOTE: Free-threading support is still experimental.
133+ FreeThreadingMutex interned_descriptors_mutex (absl::kConstInit );
84134
85135PyObject* PyString_FromCppString (absl::string_view str) {
86136 return PyUnicode_FromStringAndSize (str.data (),
@@ -402,25 +452,29 @@ PyObject* NewInternedDescriptor(PyTypeObject* type,
402452 return nullptr ;
403453 }
404454
405- absl::MutexLock lock (&interned_descriptors_lock);
406455 // See if the object is in the map of interned descriptors
407- auto it = interned_descriptors->find (descriptor);
408- if (it != interned_descriptors->end ()) {
409- ABSL_DCHECK (Py_TYPE (it->second ) == type);
410- Py_INCREF (it->second );
411- return it->second ;
456+ PyObject* existing = nullptr ;
457+ {
458+ FreeThreadingLockGuard lock (interned_descriptors_mutex);
459+ auto it = interned_descriptors->find (descriptor);
460+ if (it != interned_descriptors->end ()) {
461+ ABSL_DCHECK (Py_TYPE (it->second ) == type);
462+ existing = it->second ;
463+ }
464+ }
465+ // Py_INCREF must be called outside the lock to avoid deadlock
466+ if (existing != nullptr ) {
467+ Py_INCREF (existing);
468+ return existing;
412469 }
470+
413471 // Create a new descriptor object
414472 PyBaseDescriptor* py_descriptor = PyObject_GC_New (PyBaseDescriptor, type);
415473 if (py_descriptor == nullptr ) {
416474 return nullptr ;
417475 }
418476 py_descriptor->descriptor = descriptor;
419477
420- // and cache it.
421- interned_descriptors->insert (
422- std::make_pair (descriptor, reinterpret_cast <PyObject*>(py_descriptor)));
423-
424478 // Ensures that the DescriptorPool stays alive.
425479 PyDescriptorPool* pool =
426480 GetDescriptorPool_FromPool (GetFileDescriptor (descriptor)->pool ());
@@ -434,6 +488,26 @@ PyObject* NewInternedDescriptor(PyTypeObject* type,
434488
435489 PyObject_GC_Track (py_descriptor);
436490
491+ // Cache the fully initialized descriptor.
492+ // Check again if another thread cached it while we were initializing.
493+ {
494+ FreeThreadingLockGuard lock (interned_descriptors_mutex);
495+ auto [it, inserted] = interned_descriptors->insert (
496+ std::make_pair (descriptor, reinterpret_cast <PyObject*>(py_descriptor)));
497+ if (!inserted) {
498+ // Another thread beat us to it. Use the existing descriptor.
499+ ABSL_DCHECK (Py_TYPE (it->second ) == type);
500+ existing = it->second ;
501+ }
502+ }
503+
504+ // If another thread cached first, clean up our descriptor and use theirs
505+ if (existing != nullptr ) {
506+ Py_DECREF (py_descriptor);
507+ Py_INCREF (existing);
508+ return existing;
509+ }
510+
437511 if (was_created) {
438512 *was_created = true ;
439513 }
@@ -442,13 +516,11 @@ PyObject* NewInternedDescriptor(PyTypeObject* type,
442516
443517static void Dealloc (PyObject* pself) {
444518 PyBaseDescriptor* self = reinterpret_cast <PyBaseDescriptor*>(pself);
445-
519+ // Remove from interned dictionary
446520 {
447- absl::MutexLock mu (&interned_descriptors_lock);
448- // Remove from interned dictionary
521+ FreeThreadingLockGuard lock (interned_descriptors_mutex);
449522 interned_descriptors->erase (self->descriptor );
450523 }
451-
452524 Py_CLEAR (self->pool );
453525 PyObject_GC_UnTrack (pself);
454526 Py_TYPE (self)->tp_free (pself);
0 commit comments