Disabled external gits

This commit is contained in:
2022-04-07 18:46:57 +02:00
parent 88cb3426ad
commit 15e7120d6d
5316 changed files with 4563444 additions and 6 deletions

View File

@@ -0,0 +1,46 @@
BIN := ../$(notdir $(lastword $(abspath .))).so
EXT_H := h
EXT_HPP := h hh hpp hxx h++
EXT_C := c
EXT_CXX := C cc cpp cxx c++
INCLUDE_DIR := ../include
SOURCE_DIR := .
WILD_EXT = $(strip $(foreach EXT,$($(1)),$(wildcard $(2)/*.$(EXT))))
HDRS_C := $(call WILD_EXT,EXT_H,$(INCLUDE_DIR))
HDRS_CXX := $(call WILD_EXT,EXT_HPP,$(INCLUDE_DIR))
SRCS_C := $(call WILD_EXT,EXT_C,$(SOURCE_DIR))
SRCS_CXX := $(call WILD_EXT,EXT_CXX,$(SOURCE_DIR))
OBJS := $(SRCS_C:%=%.o) $(SRCS_CXX:%=%.o)
CC := $(CC)
CCFLAGS := -Wall -Wextra -Wfatal-errors -O2 -std=c11 -fPIC -I$(INCLUDE_DIR)
CXX := $(CXX)
CXXFLAGS := -Wall -Wextra -Wfatal-errors -O2 -std=c++17 -fPIC -I$(INCLUDE_DIR)
LD := $(if $(SRCS_CXX),$(CXX),$(CC))
LDFLAGS := -shared
LDLIBS :=
.PHONY: build clean
build: $(BIN)
clean:
$(RM) $(OBJS) $(BIN)
define BUILD_C
%.$(1).o: %.$(1) $$(HDRS_C) Makefile
$$(CC) $$(CCFLAGS) -c -o $$@ $$<
endef
$(foreach EXT,$(EXT_C),$(eval $(call BUILD_C,$(EXT))))
define BUILD_CXX
%.$(1).o: %.$(1) $$(HDRS_CXX) Makefile
$$(CXX) $$(CXXFLAGS) -c -o $$@ $$<
endef
$(foreach EXT,$(EXT_CXX),$(eval $(call BUILD_CXX,$(EXT))))
$(BIN): $(OBJS) Makefile
$(LD) $(LDFLAGS) -o $@ $(OBJS) $(LDLIBS)

View File

@@ -0,0 +1,27 @@
#include "lock.h"
bool lock_init(struct lock_t* lock) {
return pthread_mutex_init(&(lock->mutex), NULL) == 0
&& pthread_cond_init(&(lock->cv), NULL) == 0;
}
void lock_cleanup(struct lock_t* lock) {
pthread_mutex_destroy(&(lock->mutex));
pthread_cond_destroy(&(lock->cv));
}
bool lock_acquire(struct lock_t* lock) {
return pthread_mutex_lock(&(lock->mutex)) == 0;
}
void lock_release(struct lock_t* lock) {
pthread_mutex_unlock(&(lock->mutex));
}
void lock_wait(struct lock_t* lock) {
pthread_cond_wait(&(lock->cv), &(lock->mutex));
}
void lock_wake_up(struct lock_t* lock) {
pthread_cond_broadcast(&(lock->cv));
}

View File

@@ -0,0 +1,47 @@
#pragma once
#include <pthread.h>
#include <stdbool.h>
/**
* @brief A lock that can only be taken exclusively. Contrarily to shared locks,
* exclusive locks have wait/wake_up capabilities.
*/
struct lock_t {
pthread_mutex_t mutex;
pthread_cond_t cv;
};
/** Initialize the given lock.
* @param lock Lock to initialize
* @return Whether the operation is a success
**/
bool lock_init(struct lock_t* lock);
/** Clean up the given lock.
* @param lock Lock to clean up
**/
void lock_cleanup(struct lock_t* lock);
/** Wait and acquire the given lock.
* @param lock Lock to acquire
* @return Whether the operation is a success
**/
bool lock_acquire(struct lock_t* lock);
/** Release the given lock.
* @param lock Lock to release
**/
void lock_release(struct lock_t* lock);
/** Wait until woken up by a signal on the given lock.
* The lock is released until lock_wait completes at which point it is acquired
* again. Exclusive lock access is enforced.
* @param lock Lock to release (until woken up) and wait on.
**/
void lock_wait(struct lock_t* lock);
/** Wake up all threads waiting on the given lock.
* @param lock Lock on which other threads are waiting.
**/
void lock_wake_up(struct lock_t* lock);

View File

@@ -0,0 +1,36 @@
#include <stdbool.h>
/** Define a proposition as likely true.
* @param prop Proposition
**/
#undef likely
#ifdef __GNUC__
#define likely(prop) \
__builtin_expect((prop) ? true : false, true /* likely */)
#else
#define likely(prop) \
(prop)
#endif
/** Define a proposition as likely false.
* @param prop Proposition
**/
#undef unlikely
#ifdef __GNUC__
#define unlikely(prop) \
__builtin_expect((prop) ? true : false, false /* unlikely */)
#else
#define unlikely(prop) \
(prop)
#endif
/** Define a variable as unused.
**/
#undef unused
#ifdef __GNUC__
#define unused(variable) \
variable __attribute__((unused))
#else
#define unused(variable)
#warning This compiler has no support for GCC attributes
#endif

View File

@@ -0,0 +1,25 @@
#include "shared-lock.h"
bool shared_lock_init(struct shared_lock_t* lock) {
return pthread_rwlock_init(&lock->rwlock, NULL) == 0;
}
void shared_lock_cleanup(struct shared_lock_t* lock) {
pthread_rwlock_destroy(&lock->rwlock);
}
bool shared_lock_acquire(struct shared_lock_t* lock) {
return pthread_rwlock_wrlock(&lock->rwlock) == 0;
}
void shared_lock_release(struct shared_lock_t* lock) {
pthread_rwlock_unlock(&lock->rwlock);
}
bool shared_lock_acquire_shared(struct shared_lock_t* lock) {
return pthread_rwlock_rdlock(&lock->rwlock) == 0;
}
void shared_lock_release_shared(struct shared_lock_t* lock) {
pthread_rwlock_unlock(&lock->rwlock);
}

View File

@@ -0,0 +1,53 @@
#pragma once
// Requested feature: pthread_rwlock_t
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifndef __USE_XOPEN2K
#define __USE_XOPEN2K
#endif
#include <pthread.h>
#include <stdbool.h>
/**
* @brief A lock that can be taken exclusively but also shared. Contrarily to
* exclusive locks, shared locks do not have wait/wake_up capabilities.
*/
struct shared_lock_t {
pthread_rwlock_t rwlock;
};
/** Initialize the given lock.
* @param lock Lock to initialize
* @return Whether the operation is a success
**/
bool shared_lock_init(struct shared_lock_t* lock);
/** Clean the given lock up.
* @param lock Lock to clean up
**/
void shared_lock_cleanup(struct shared_lock_t* lock);
/** Wait and acquire the given lock exclusively.
* @param lock Lock to acquire
* @return Whether the operation is a success
**/
bool shared_lock_acquire(struct shared_lock_t* lock);
/** Release the given lock that has been taken exclusively.
* @param lock Lock to release
**/
void shared_lock_release(struct shared_lock_t* lock);
/** Wait and acquire the given lock non-exclusively.
* @param lock Lock to acquire
* @return Whether the operation is a success
**/
bool shared_lock_acquire_shared(struct shared_lock_t* lock);
/** Release the given lock that has been taken non-exclusively.
* @param lock Lock to release
**/
void shared_lock_release_shared(struct shared_lock_t* lock);

View File

@@ -0,0 +1,198 @@
/**
* @file tm.c
* @author Sébastien Rouault <sebastien.rouault@epfl.ch>
* @author Antoine Murat <antoine.murat@epfl.ch>
*
* @section LICENSE
*
* Copyright © 2018-2021 Sébastien Rouault.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* any later version. Please see https://gnu.org/licenses/gpl.html
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* @section DESCRIPTION
*
* Lock-based transaction manager implementation used as the reference.
**/
// Requested feature: posix_memalign
#define _POSIX_C_SOURCE 200809L
// External headers
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
// Internal headers
#include <tm.h>
#include "macros.h"
#include "shared-lock.h"
static const tx_t read_only_tx = UINTPTR_MAX - 10;
static const tx_t read_write_tx = UINTPTR_MAX - 11;
/**
* @brief List of dynamically allocated segments.
*/
struct segment_node {
struct segment_node* prev;
struct segment_node* next;
// uint8_t segment[] // segment of dynamic size
};
typedef struct segment_node* segment_list;
/**
* @brief Simple Shared Memory Region (a.k.a Transactional Memory).
*/
struct region {
struct shared_lock_t lock; // Global (coarse-grained) lock
void* start; // Start of the shared memory region (i.e., of the non-deallocable memory segment)
segment_list allocs; // Shared memory segments dynamically allocated via tm_alloc within transactions
size_t size; // Size of the non-deallocable memory segment (in bytes)
size_t align; // Size of a word in the shared memory region (in bytes)
};
shared_t tm_create(size_t size, size_t align) {
struct region* region = (struct region*) malloc(sizeof(struct region));
if (unlikely(!region)) {
return invalid_shared;
}
// We allocate the shared memory buffer such that its words are correctly
// aligned.
if (posix_memalign(&(region->start), align, size) != 0) {
free(region);
return invalid_shared;
}
if (!shared_lock_init(&(region->lock))) {
free(region->start);
free(region);
return invalid_shared;
}
memset(region->start, 0, size);
region->allocs = NULL;
region->size = size;
region->align = align;
return region;
}
void tm_destroy(shared_t shared) {
// Note: To be compatible with any implementation, shared_t is defined as a
// void*. For this particular implementation, the "real" type of a shared_t
// is a struct region*.
struct region* region = (struct region*) shared;
while (region->allocs) { // Free allocated segments
segment_list tail = region->allocs->next;
free(region->allocs);
region->allocs = tail;
}
free(region->start);
shared_lock_cleanup(&(region->lock));
free(region);
}
// Note: In this particular implementation, tm_start returns a valid virtual
// address (i.e., shared memory locations are virtually addressed).
// This is NOT required. Indeed, as the content of shared memory is only ever
// accessed via tm functions (read/write/free), you can use any naming scheme
// you want to designate a word within the transactional memory as long as it
// fits in a void*. Said functions will need to translate from a void* to a
// specific word. Moreover, your naming scheme should support pointer arithmetic
// (i.e., one should be able to pass tm_start(shared)+align*n to access the
// (n+1)-th word within a memory region).
// You can assume sizeof(void*) == 64b and that the maximum size ever allocated
// will be 2^48.
void* tm_start(shared_t shared) {
return ((struct region*) shared)->start;
}
size_t tm_size(shared_t shared) {
return ((struct region*) shared)->size;
}
size_t tm_align(shared_t shared) {
return ((struct region*) shared)->align;
}
tx_t tm_begin(shared_t shared, bool is_ro) {
// We let read-only transactions run in parallel by acquiring a shared
// access. On the other hand, read-write transactions acquire an exclusive
// access. At any point in time, the lock can be shared between any number
// of read-only transactions or held by a single read-write transaction.
if (is_ro) {
// Note: "unlikely" is a macro that helps branch prediction.
// It tells the compiler (GCC) that the condition is unlikely to be true
// and to optimize the code with this additional knowledge.
// It of course penalizes executions in which the condition turns up to
// be true.
if (unlikely(!shared_lock_acquire_shared(&(((struct region*) shared)->lock))))
return invalid_tx;
return read_only_tx;
} else {
if (unlikely(!shared_lock_acquire(&(((struct region*) shared)->lock))))
return invalid_tx;
return read_write_tx;
}
}
bool tm_end(shared_t shared, tx_t tx) {
if (tx == read_only_tx) {
shared_lock_release_shared(&(((struct region*) shared)->lock));
} else {
shared_lock_release(&(((struct region*) shared)->lock));
}
return true;
}
// Note: "unused" is a macro that tells the compiler that a variable is unused.
bool tm_read(shared_t unused(shared), tx_t unused(tx), void const* source, size_t size, void* target) {
memcpy(target, source, size);
return true;
}
bool tm_write(shared_t unused(shared), tx_t unused(tx), void const* source, size_t size, void* target) {
memcpy(target, source, size);
return true;
}
alloc_t tm_alloc(shared_t shared, tx_t unused(tx), size_t size, void** target) {
// We allocate the dynamic segment such that its words are correctly
// aligned. Moreover, the alignment of the 'next' and 'prev' pointers must
// be satisfied. Thus, we use align on max(align, struct segment_node*).
size_t align = ((struct region*) shared)->align;
align = align < sizeof(struct segment_node*) ? sizeof(void*) : align;
struct segment_node* sn;
if (unlikely(posix_memalign((void**)&sn, align, sizeof(struct segment_node) + size) != 0)) // Allocation failed
return nomem_alloc;
// Insert in the linked list
sn->prev = NULL;
sn->next = ((struct region*) shared)->allocs;
if (sn->next) sn->next->prev = sn;
((struct region*) shared)->allocs = sn;
void* segment = (void*) ((uintptr_t) sn + sizeof(struct segment_node));
memset(segment, 0, size);
*target = segment;
return success_alloc;
}
bool tm_free(shared_t shared, tx_t unused(tx), void* segment) {
struct segment_node* sn = (struct segment_node*) ((uintptr_t) segment - sizeof(struct segment_node));
// Remove from the linked list
if (sn->prev) sn->prev->next = sn->next;
else ((struct region*) shared)->allocs = sn->next;
if (sn->next) sn->next->prev = sn->prev;
free(sn);
return true;
}